import tensorflow as tf
#import sys
#from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
import random
# import the UMAP library
import umap.umap_ as umap
from tlcam_layer import ScoreCAM
from functools import reduce
#import pandas as pd
import cv2
import sys
sys.path.append('../QXP/')
from ScoreCAM_QXP import ScoreCAM_QXP
from computeEvalX import EvalX
def get_last_conv_layer_name(model):
for layer in reversed(model.layers):
if isinstance(layer, tf.keras.layers.Conv2D):
return layer.name
return None
def get_conv_layer_name(model, index):
if index == -1:
return get_last_conv_layer_name(model)
count_conv_layers = 0
for layer in model.layers:
if isinstance(layer, tf.keras.layers.Conv2D):
count_conv_layers += 1
if count_conv_layers == index:
return layer.name
(_,_), (x_test, y_test)=tf.keras.datasets.cifar100.load_data()
x_test = x_test / 255
def unpickle(file):
import pickle
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='utf-8')
return dict
metadata_path = '/Users/air/Downloads/cifar-100-python_downloadedfromwebsite/meta'
metadata = unpickle(metadata_path)
labels = list(metadata['fine_label_names'])
print(labels)
['apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle', 'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel', 'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock', 'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur', 'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster', 'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion', 'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse', 'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear', 'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine', 'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose', 'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake', 'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table', 'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout', 'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm']
#Model trained from scratch
baseline = load_model("/Users/air/Downloads/PhD/CompletedProjects/TL-CAM/COMPLETE/10_100/models/baseline_model.h5")
#Model trained with TL-CAM using k=50
tlcam_k50_model = load_model("/Users/air/Downloads/PhD/CompletedProjects/TL-CAM/COMPLETE/10_100/models/cam_k50_model.h5")
#Model trained with T
tl_model = load_model("/Users/air/Downloads/PhD/CompletedProjects/TL-CAM/COMPLETE/10_100/models/tl_model_model.h5")
None 50
#print(tl_model.summary())
#print(baseline.evaluate(x_test, tf.keras.utils.to_categorical(y_test, 100), batch_size=16))
#print(tlcam_k50_model.evaluate(x_test, tf.keras.utils.to_categorical(y_test, 100), batch_size=16))
#print(tl_model.evaluate(x_test, tf.keras.utils.to_categorical(y_test, 100), batch_size=32))
threshold = 50
x_test = x_test[0:1000]
y_test = y_test[0:1000]
print("Extracting correct predictions by baseline for EvalX")
predictions_baseline = tf.argmax(baseline.predict(x_test), axis=-1)
indices_correct_preds_baseline = [i for i, pred in enumerate(predictions_baseline) if pred==y_test[i]]
print("Extracting correct predictions by TL-CAM for EvalX with k=", threshold)
predictions_tlcam = tf.argmax(tlcam_k50_model.predict(x_test), axis=-1)
indices_correct_preds_tlcam = [i for i, pred in enumerate(predictions_tlcam) if pred==y_test[i]]
print("Extracting correct predictions by TL for EvalX")
predictions_tl = tf.argmax(tl_model.predict(x_test), axis=-1)
indices_correct_preds_tl = [i for i, pred in enumerate(predictions_tl) if pred==y_test[i]]
print("Creating test set for EvalX")
x_test_evalx = np.array([x_test[i] for i in reduce(np.intersect1d, (indices_correct_preds_baseline, indices_correct_preds_tlcam, indices_correct_preds_tl))])
y_test_evalx = np.array([y_test[i] for i in reduce(np.intersect1d, (indices_correct_preds_baseline, indices_correct_preds_tlcam, indices_correct_preds_tl))])
print("Done")
Extracting correct predictions by baseline for EvalX Extracting correct predictions by TL-CAM for EvalX with k= 50 Extracting correct predictions by TL for EvalX Creating test set for EvalX Done
subset_x = x_test_evalx
subset_y = y_test_evalx
print(len(subset_x))
333
# Loading sample images
x_test = subset_x
y_test = subset_y
#index = random.randint(0, x_test.shape[0])
index = 209
print("index", index)
image = x_test[index]
#Visualizing
plt.imshow(image)
plt.axis('off')
plt.tight_layout()
plt.show()
# Preparing
X = tf.expand_dims(image, axis=0)
#X = subset_x
print("True label : ",y_test[index], labels[int(y_test[index])])
print("\n")
print("baseline prediction:", tf.argmax(baseline.predict(X), axis=-1), labels[int(tf.argmax(baseline.predict(X), axis=-1))])
print("TL-CAM k=50 prediction:", tf.argmax(tlcam_k50_model.predict(X), axis=-1), labels[int(tf.argmax(tlcam_k50_model.predict(X), axis=-1))])
print("TL prediction:", tf.argmax(tl_model.predict(X), axis=-1), labels[int(tf.argmax(tl_model.predict(X), axis=-1))])
#index for motorcycle sample in manuscript is 209
index 209
True label : [48] motorcycle baseline prediction: tf.Tensor([48], shape=(1,), dtype=int64) motorcycle TL-CAM k=50 prediction: tf.Tensor([48], shape=(1,), dtype=int64) motorcycle TL prediction: tf.Tensor([48], shape=(1,), dtype=int64) motorcycle
baseline_scorecam = ScoreCAM_QXP(baseline, penultimate_layer=get_conv_layer_name(baseline, -1), threshold=None)
print("Baseline QXP")
baseline_qxp, _ = baseline_scorecam(X, quantitative_explanation = True, labels = labels, activation_modifier = True)
pred_baseline = baseline.predict(X)[0]
sorted_indices = np.argsort(-pred_baseline)
print(" ".join("{},".format(labels[i]) for i in sorted_indices[:10]), end="")
Baseline QXP
motorcycle, tractor, sunflower, butterfly, lawn_mower, bee, crab, bicycle, caterpillar, lizard,
baseline_eval = EvalX()(X, baseline, penultimate_layer=get_conv_layer_name(baseline, -1), threshold=None, ground_truths=tf.keras.utils.to_categorical(y_test[index], 100))
avg_kl, avg_jd, avg_qxp = baseline_eval
print('Avg. Kullback–Leibler divergence:', avg_kl)
print('Avg. Jaccard Distance:', avg_jd)
print('Avg. QXP predictions:', avg_qxp)
Avg. Kullback–Leibler divergence: [1.7609878105613013] Avg. Jaccard Distance: [0.6666666666666667] Avg. QXP predictions: [8]
#Visualizing multiple inputs
#f, ax = plt.subplots(nrows=1, ncols=len(X), figsize=(12, 4))
#for i in range(len(X)):
# ax[i].imshow(X[i])
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
#f, ax = plt.subplots(nrows=1, ncols=len(baseline_qxp), figsize=(12, 4))
#for i in range(len(baseline_qxp)):
# ax[i].imshow(X[i])
# ax[i].imshow(baseline_qxp[i], cmap='jet', alpha=0.5)
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
tlcam_scorecam = ScoreCAM_QXP(tlcam_k50_model, penultimate_layer=get_conv_layer_name(tlcam_k50_model, -1), threshold=None)
print("TL-CAM k=50 QXP")
tlcam_qxp, _ = tlcam_scorecam(X, quantitative_explanation = True, labels = labels, activation_modifier = True)
pred_tlcam = tlcam_k50_model.predict(X)[0]
sorted_indices = np.argsort(-pred_tlcam)
print(" ".join("{},".format(labels[i]) for i in sorted_indices[:10]), end="")
TL-CAM k=50 QXP
motorcycle, bee, lawn_mower, sunflower, tractor, butterfly, caterpillar, can, beetle, bottle,
#Visualizing
#f, ax = plt.subplots(nrows=1, ncols=len(X), figsize=(12, 4))
#for i in range(len(X)):
# ax[i].imshow(X[i])
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
#f, ax = plt.subplots(nrows=1, ncols=len(tlcam_qxp), figsize=(12, 4))#
#for i in range(len(tlcam_qxp)):
# ax[i].imshow(X[i])
# ax[i].imshow(tlcam_qxp[i], cmap='jet', alpha=0.5)
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
tlcam_eval = EvalX()(X, tlcam_k50_model, penultimate_layer=get_conv_layer_name(tlcam_k50_model, -1), threshold=None, ground_truths=tf.keras.utils.to_categorical(y_test[index], 100))
avg_kl, avg_jd, avg_qxp = tlcam_eval
print('Avg. Kullback–Leibler divergence:', avg_kl)
print('Avg. Jaccard Distance:', avg_jd)
print('Avg. QXP predictions:', avg_qxp)
Avg. Kullback–Leibler divergence: [0.5212969236332861] Avg. Jaccard Distance: [0.0] Avg. QXP predictions: [4]
tl_scorecam = ScoreCAM_QXP(tl_model, penultimate_layer=get_conv_layer_name(tl_model, -1), threshold=None)
print("TL QXP")
tl_qxp,_= tl_scorecam(X, quantitative_explanation = True, labels = labels, activation_modifier = True)
pred_tl = tl_model.predict(X)[0]
sorted_indices = np.argsort(-pred_tl)
print(" ".join("{},".format(labels[i]) for i in sorted_indices[:10]), end="")
TL QXP
motorcycle, bicycle, lawn_mower, tractor, butterfly, snake, lobster, bee, pickup_truck, tank,
tl_eval = EvalX()(X, tl_model, penultimate_layer=get_conv_layer_name(tl_model, -1), threshold=None, ground_truths=tf.keras.utils.to_categorical(y_test[index], 100))
avg_kl, avg_jd, avg_qxp = tl_eval
print('Avg. Kullback–Leibler divergence:', avg_kl)
print('Avg. Jaccard Distance:', avg_jd)
print('Avg. QXP predictions:', avg_qxp)
Avg. Kullback–Leibler divergence: [0.18388527877013738] Avg. Jaccard Distance: [0.5] Avg. QXP predictions: [9]
tl_scorecam64 = ScoreCAM_QXP(tl_model, penultimate_layer=get_conv_layer_name(tl_model, -1), threshold=64)
print("TL QXP")
tl_qxp,_= tl_scorecam64(X, quantitative_explanation = True, labels = labels, activation_modifier = True)
pred_tl = tl_model.predict(X)[0]
sorted_indices = np.argsort(-pred_tl)
print(" ".join("{},".format(labels[i]) for i in sorted_indices[:10]), end="")
TL QXP
motorcycle, bicycle, lawn_mower, tractor, butterfly, snake, lobster, bee, pickup_truck, tank,
tl_eval64 = EvalX()(X, tl_model, penultimate_layer=get_conv_layer_name(tl_model, -1), threshold=64, ground_truths=tf.keras.utils.to_categorical(y_test[index], 100))
avg_kl, avg_jd, avg_qxp = tl_eval64
print('Avg. Kullback–Leibler divergence:', avg_kl)
print('Avg. Jaccard Distance:', avg_jd)
print('Avg. QXP predictions:', avg_qxp)
Avg. Kullback–Leibler divergence: [0.18859116980754997] Avg. Jaccard Distance: [0.5714285714285714] Avg. QXP predictions: [5]
#Visualizing
#f, ax = plt.subplots(nrows=1, ncols=len(X), figsize=(12, 4))
#for i in range(len(X)):
# ax[i].imshow(X[i])
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
#f, ax = plt.subplots(nrows=1, ncols=len(tl_qxp), figsize=(12, 4))
#for i in range(len(tl_qxp)):
# ax[i].imshow(X[i])
# ax[i].imshow(tl_qxp[i], cmap='jet', alpha=0.5)
# ax[i].axis('off')
#plt.tight_layout()
#plt.show()
w=32
h=32
# Show original image
plt.imshow(cv2.resize(image, (w, h)))
plt.axis('off')
plt.tight_layout()
plt.show()
# Show Score-CAM results
scorecams = [baseline_qxp[0], tlcam_qxp[0], tl_qxp[0]]
f, ax = plt.subplots(nrows=1, ncols=len(scorecams), figsize=(12, 4))
image_titles = ['Baseline', 'Tl-CAM K=50', 'TL_4']
for i, title in enumerate(image_titles):
ax[i].set_title(title, fontsize=16)
ax[i].imshow(cv2.resize(image, (w, h)))
ax[i].imshow(cv2.resize(scorecams[i].numpy(), (w, h)), cmap='jet', alpha=0.5)
ax[i].axis('off')
plt.tight_layout()
#plt.savefig('maps.jpg', dpi=300)
plt.show()
# Test with three samples
# Loading sample images
x_test = subset_x
y_test = subset_y
index1 = random.randint(0, len(x_test))
index2 = random.randint(0, len(x_test))
index3 = random.randint(0, len(x_test))
index4 = random.randint(0, len(x_test))
image1 = x_test[index1]
image2 = x_test[index2]
image3 = x_test[index3]
image4 = x_test[index4]
images = np.asarray([np.array(image1), np.array(image2), np.array(image3), np.array(image4)])
#Visualizing
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(12, 4))
for i in range(len(images)):
ax[i].imshow(images[i])
ax[i].axis('off')
plt.tight_layout()
plt.show()
# Preparing
X = tf.convert_to_tensor(images)
print("True label 1: ",y_test[index1], labels[int(y_test[index1])])
print("True label 2: ",y_test[index2], labels[int(y_test[index2])])
print("True label 3: ",y_test[index3], labels[int(y_test[index3])])
print("True label 3: ",y_test[index4], labels[int(y_test[index4])])
print("\n")
print("baseline prediction:", tf.argmax(baseline.predict(X), axis=1))
print("TL-CAM k=50 prediction:", tf.argmax(tlcam_k50_model.predict(X), axis=1))
print("TL prediction:", tf.argmax(tl_model.predict(X), axis=1))
True label 1: [42] leopard True label 2: [10] bowl True label 3: [25] couch True label 3: [24] cockroach baseline prediction: tf.Tensor([42 10 25 24], shape=(4,), dtype=int64) TL-CAM k=50 prediction: tf.Tensor([42 10 25 24], shape=(4,), dtype=int64) TL prediction: tf.Tensor([42 10 25 24], shape=(4,), dtype=int64)
tl_scorecam = ScoreCAM_QXP(tl_model, penultimate_layer=get_conv_layer_name(tl_model, -1), threshold=10)
tl_qxp,_ = tl_scorecam(X, quantitative_explanation = True, labels = labels, activation_modifier = True)
pred_tl_model = tl_model.predict(X)
sorted_indices = np.argsort(-pred_tl_model)
for pred in sorted_indices:
print(" ".join("{},".format(labels[i]) for i in pred[:10]), end="\n")
leopard, tiger, wolf, raccoon, mouse, kangaroo, lion, spider, squirrel, fox, bowl, bus, clock, pickup_truck, can, plate, television, streetcar, train, snake, couch, bed, table, tank, chair, television, pickup_truck, trout, rabbit, bowl, cockroach, beetle, spider, crab, lobster, bee, lizard, dinosaur, table, mouse,
#Visualizing
f, ax = plt.subplots(nrows=1, ncols=4, figsize=(12, 4))
for i in range(len(tl_qxp)):
ax[i].imshow(images[i])
ax[i].imshow(tl_qxp[i], cmap='jet', alpha=0.5)
ax[i].axis('off')
plt.tight_layout()
plt.show()